From d3747dd57a1666f182c4ddc8e75c7fe83892edc0 Mon Sep 17 00:00:00 2001 From: "djm@kirby.fc.hp.com" Date: Thu, 29 Sep 2005 13:16:40 -0600 Subject: [PATCH] Minor code restructure in vcpu_translate (prep for more later) --- xen/arch/ia64/asm-offsets.c | 1 + xen/arch/ia64/xen/process.c | 4 +-- xen/arch/ia64/xen/vcpu.c | 72 +++++++++++++++++-------------------- xen/include/asm-ia64/vcpu.h | 2 +- 4 files changed, 37 insertions(+), 42 deletions(-) diff --git a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c index 8c1dfe6d6c..c28d762aba 100644 --- a/xen/arch/ia64/asm-offsets.c +++ b/xen/arch/ia64/asm-offsets.c @@ -65,6 +65,7 @@ void foo(void) DEFINE(XSI_INCOMPL_REG_OFS, offsetof(mapped_regs_t, incomplete_regframe)); DEFINE(XSI_PEND_OFS, offsetof(mapped_regs_t, pending_interruption)); DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0])); + DEFINE(XSI_IHA_OFS, offsetof(mapped_regs_t, iha)); DEFINE(XSI_TPR_OFS, offsetof(mapped_regs_t, tpr)); DEFINE(XSI_PTA_OFS, offsetof(mapped_regs_t, pta)); DEFINE(XSI_ITV_OFS, offsetof(mapped_regs_t, itv)); diff --git a/xen/arch/ia64/xen/process.c b/xen/arch/ia64/xen/process.c index 0e975b668f..9d92f3aca5 100644 --- a/xen/arch/ia64/xen/process.c +++ b/xen/arch/ia64/xen/process.c @@ -275,7 +275,7 @@ int handle_lazy_cover(struct vcpu *v, unsigned long isr, struct pt_regs *regs) void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir) { - unsigned long iip = regs->cr_iip; + unsigned long iip = regs->cr_iip, iha; // FIXME should validate address here unsigned long pteval; unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL); @@ -294,7 +294,7 @@ void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_reg return; } - fault = vcpu_translate(current,address,is_data,&pteval,&itir); + fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha); if (fault == IA64_NO_FAULT) { pteval = translate_domain_pte(pteval,address,itir); diff --git a/xen/arch/ia64/xen/vcpu.c b/xen/arch/ia64/xen/vcpu.c index 44c13b1eb6..14627008a6 100644 --- a/xen/arch/ia64/xen/vcpu.c +++ b/xen/arch/ia64/xen/vcpu.c @@ -1354,9 +1354,9 @@ IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr) unsigned long vhpt_translate_count = 0; -IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir) +IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir, UINT64 *iha) { - unsigned long pta, pta_mask, iha, pte, ps; + unsigned long pta, pta_mask, pte, ps; TR_ENTRY *trp; ia64_rr rr; @@ -1398,51 +1398,45 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pt /* check guest VHPT */ pta = PSCB(vcpu,pta); rr.rrval = PSCB(vcpu,rrs)[address>>61]; - if (rr.ve && (pta & IA64_PTA_VE)) - { - if (pta & IA64_PTA_VF) - { - /* long format VHPT - not implemented */ - return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR); - } - else - { - /* short format VHPT */ - - /* avoid recursively walking VHPT */ - pta_mask = (itir_mask(pta) << 3) >> 3; - if (((address ^ pta) & pta_mask) == 0) - return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR); - - vcpu_thash(vcpu, address, &iha); - if (__copy_from_user(&pte, (void *)iha, sizeof(pte)) != 0) - return IA64_VHPT_FAULT; - - /* - * Optimisation: this VHPT walker aborts on not-present pages - * instead of inserting a not-present translation, this allows - * vectoring directly to the miss handler. - \ */ - if (pte & _PAGE_P) - { - *pteval = pte; - *itir = vcpu_get_itir_on_fault(vcpu,address); - vhpt_translate_count++; - return IA64_NO_FAULT; - } - return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR); - } + if (!rr.ve || !(pta & IA64_PTA_VE)) + return (is_data ? IA64_ALT_DATA_TLB_VECTOR : + IA64_ALT_INST_TLB_VECTOR); + if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */ + // thash won't work right? + panic_domain(vcpu_regs(vcpu),"can't do long format VHPT\n"); + //return (is_data ? IA64_DATA_TLB_VECTOR:IA64_INST_TLB_VECTOR); + } + + /* avoid recursively walking (short format) VHPT */ + pta_mask = (itir_mask(pta) << 3) >> 3; + if (((address ^ pta) & pta_mask) == 0) + return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR); + + vcpu_thash(vcpu, address, iha); + if (__copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0) + return IA64_VHPT_FAULT; + + /* + * Optimisation: this VHPT walker aborts on not-present pages + * instead of inserting a not-present translation, this allows + * vectoring directly to the miss handler. + */ + if (pte & _PAGE_P) { + *pteval = pte; + *itir = vcpu_get_itir_on_fault(vcpu,address); + vhpt_translate_count++; + return IA64_NO_FAULT; } - return (is_data ? IA64_ALT_DATA_TLB_VECTOR : IA64_ALT_INST_TLB_VECTOR); + return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR); } IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr) { - UINT64 pteval, itir, mask; + UINT64 pteval, itir, mask, iha; IA64FAULT fault; in_tpa = 1; - fault = vcpu_translate(vcpu, vadr, 1, &pteval, &itir); + fault = vcpu_translate(vcpu, vadr, 1, &pteval, &itir, &iha); in_tpa = 0; if (fault == IA64_NO_FAULT) { diff --git a/xen/include/asm-ia64/vcpu.h b/xen/include/asm-ia64/vcpu.h index f18bf651ac..812e91cae2 100644 --- a/xen/include/asm-ia64/vcpu.h +++ b/xen/include/asm-ia64/vcpu.h @@ -133,7 +133,7 @@ extern IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range); extern IA64FAULT vcpu_ptc_ga(VCPU *vcpu, UINT64 vadr, UINT64 addr_range); extern IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr, UINT64 addr_range); extern IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr, UINT64 addr_range); -extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir); +extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir, UINT64 *iha); extern IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr); /* misc */ extern IA64FAULT vcpu_rfi(VCPU *vcpu); -- 2.30.2